*
* Communication via Xen event channels.
*
- * Copyright (c) 2002-2004, K A Fraser
+ * Copyright (c) 2002-2005, K A Fraser
*
* This file may be distributed separately from the Linux kernel, or
* incorporated into other software packages, subject to the following license:
static u8 cpu_evtchn[NR_EVENT_CHANNELS];
static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
-#define active_evtchns(cpu,sh,idx) \
- ((sh)->evtchn_pending[idx] & \
- cpu_evtchn_mask[cpu][idx] & \
- ~(sh)->evtchn_mask[idx])
+#define active_evtchns(cpu,sh,idx) \
+ ((sh)->evtchn_pending[idx] & \
+ cpu_evtchn_mask[cpu][idx] & \
+ ~(sh)->evtchn_mask[idx])
void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
{
- clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
- set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
- cpu_evtchn[chn] = cpu;
+ clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
+ set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
+ cpu_evtchn[chn] = cpu;
}
#else
-#define active_evtchns(cpu,sh,idx) \
- ((sh)->evtchn_pending[idx] & \
- ~(sh)->evtchn_mask[idx])
+#define active_evtchns(cpu,sh,idx) \
+ ((sh)->evtchn_pending[idx] & \
+ ~(sh)->evtchn_mask[idx])
void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
{
#elif defined (__x86_64__)
#define IRQ_REG orig_rax
#endif
-#define do_IRQ(irq, regs) do { \
- (regs)->IRQ_REG = (irq); \
- do_IRQ((regs)); \
+#define do_IRQ(irq, regs) do { \
+ (regs)->IRQ_REG = (irq); \
+ do_IRQ((regs)); \
} while (0)
#endif
*/
void force_evtchn_callback(void)
{
- (void)HYPERVISOR_xen_version(0, NULL);
+ (void)HYPERVISOR_xen_version(0, NULL);
}
EXPORT_SYMBOL(force_evtchn_callback);
/* NB. Interrupts are disabled on entry. */
asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
{
- u32 l1, l2;
- unsigned int l1i, l2i, port;
- int irq, cpu = smp_processor_id();
- shared_info_t *s = HYPERVISOR_shared_info;
- vcpu_info_t *vcpu_info = &s->vcpu_data[cpu];
+ u32 l1, l2;
+ unsigned int l1i, l2i, port;
+ int irq, cpu = smp_processor_id();
+ shared_info_t *s = HYPERVISOR_shared_info;
+ vcpu_info_t *vcpu_info = &s->vcpu_data[cpu];
- vcpu_info->evtchn_upcall_pending = 0;
+ vcpu_info->evtchn_upcall_pending = 0;
- /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
- l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
- while ( l1 != 0 )
- {
- l1i = __ffs(l1);
- l1 &= ~(1 << l1i);
+ /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
+ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+ while (l1 != 0) {
+ l1i = __ffs(l1);
+ l1 &= ~(1 << l1i);
- while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
- {
- l2i = __ffs(l2);
- l2 &= ~(1 << l2i);
+ while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
+ l2i = __ffs(l2);
+ l2 &= ~(1 << l2i);
- port = (l1i << 5) + l2i;
- if ( (irq = evtchn_to_irq[port]) != -1 ) {
- do_IRQ(irq, regs);
- } else
- evtchn_device_upcall(port);
- }
- }
+ port = (l1i << 5) + l2i;
+ if ((irq = evtchn_to_irq[port]) != -1)
+ do_IRQ(irq, regs);
+ else
+ evtchn_device_upcall(port);
+ }
+ }
}
EXPORT_SYMBOL(evtchn_do_upcall);
static int find_unbound_irq(void)
{
- int irq;
+ int irq;
- for ( irq = 0; irq < NR_IRQS; irq++ )
- if ( irq_bindcount[irq] == 0 )
- break;
+ for (irq = 0; irq < NR_IRQS; irq++)
+ if (irq_bindcount[irq] == 0)
+ break;
- if ( irq == NR_IRQS )
- panic("No available IRQ to bind to: increase NR_IRQS!\n");
+ if (irq == NR_IRQS)
+ panic("No available IRQ to bind to: increase NR_IRQS!\n");
- return irq;
+ return irq;
}
int bind_virq_to_irq(int virq)
{
- evtchn_op_t op;
- int evtchn, irq;
- int cpu = smp_processor_id();
+ evtchn_op_t op;
+ int evtchn, irq;
+ int cpu = smp_processor_id();
- spin_lock(&irq_mapping_update_lock);
+ spin_lock(&irq_mapping_update_lock);
- if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
- {
- op.cmd = EVTCHNOP_bind_virq;
- op.u.bind_virq.virq = virq;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to bind virtual IRQ %d\n", virq);
- evtchn = op.u.bind_virq.port;
+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
+ op.cmd = EVTCHNOP_bind_virq;
+ op.u.bind_virq.virq = virq;
+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+ evtchn = op.u.bind_virq.port;
- irq = find_unbound_irq();
- evtchn_to_irq[evtchn] = irq;
- irq_to_evtchn[irq] = evtchn;
+ irq = find_unbound_irq();
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
- per_cpu(virq_to_irq, cpu)[virq] = irq;
+ per_cpu(virq_to_irq, cpu)[virq] = irq;
- bind_evtchn_to_cpu(evtchn, cpu);
- }
+ bind_evtchn_to_cpu(evtchn, cpu);
+ }
- irq_bindcount[irq]++;
+ irq_bindcount[irq]++;
- spin_unlock(&irq_mapping_update_lock);
+ spin_unlock(&irq_mapping_update_lock);
- return irq;
+ return irq;
}
EXPORT_SYMBOL(bind_virq_to_irq);
void unbind_virq_from_irq(int virq)
{
- evtchn_op_t op;
- int cpu = smp_processor_id();
- int irq = per_cpu(virq_to_irq, cpu)[virq];
- int evtchn = irq_to_evtchn[irq];
-
- spin_lock(&irq_mapping_update_lock);
-
- if ( --irq_bindcount[irq] == 0 )
- {
- op.cmd = EVTCHNOP_close;
- op.u.close.dom = DOMID_SELF;
- op.u.close.port = evtchn;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to unbind virtual IRQ %d\n", virq);
-
- /*
- * This is a slight hack. Interdomain ports can be allocated directly
- * by userspace, and at that point they get bound by Xen to vcpu 0. We
- * therefore need to make sure that if we get an event on an event
- * channel we don't know about vcpu 0 handles it. Binding channels to
- * vcpu 0 when closing them achieves this.
- */
- bind_evtchn_to_cpu(evtchn, 0);
- evtchn_to_irq[evtchn] = -1;
- irq_to_evtchn[irq] = -1;
- per_cpu(virq_to_irq, cpu)[virq] = -1;
- }
-
- spin_unlock(&irq_mapping_update_lock);
+ evtchn_op_t op;
+ int cpu = smp_processor_id();
+ int irq = per_cpu(virq_to_irq, cpu)[virq];
+ int evtchn = irq_to_evtchn[irq];
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if (--irq_bindcount[irq] == 0) {
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+
+ /*
+ * This is a slight hack. Interdomain ports can be allocated
+ * directly by userspace, and at that point they get bound by
+ * Xen to vcpu 0. We therefore need to make sure that if we get
+ * an event on an event channel we don't know about vcpu 0
+ * handles it. Binding channels to vcpu 0 when closing them
+ * achieves this.
+ */
+ bind_evtchn_to_cpu(evtchn, 0);
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ per_cpu(virq_to_irq, cpu)[virq] = -1;
+ }
+
+ spin_unlock(&irq_mapping_update_lock);
}
EXPORT_SYMBOL(unbind_virq_from_irq);
int bind_ipi_to_irq(int ipi)
{
- evtchn_op_t op;
- int evtchn, irq;
- int cpu = smp_processor_id();
+ evtchn_op_t op;
+ int evtchn, irq;
+ int cpu = smp_processor_id();
- spin_lock(&irq_mapping_update_lock);
+ spin_lock(&irq_mapping_update_lock);
- if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
- {
- op.cmd = EVTCHNOP_bind_ipi;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
- evtchn = op.u.bind_ipi.port;
+ if ((evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0) {
+ op.cmd = EVTCHNOP_bind_ipi;
+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+ evtchn = op.u.bind_ipi.port;
- irq = find_unbound_irq();
- evtchn_to_irq[evtchn] = irq;
- irq_to_evtchn[irq] = evtchn;
+ irq = find_unbound_irq();
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
- per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
- bind_evtchn_to_cpu(evtchn, cpu);
- }
- else
- {
- irq = evtchn_to_irq[evtchn];
- }
+ bind_evtchn_to_cpu(evtchn, cpu);
+ } else {
+ irq = evtchn_to_irq[evtchn];
+ }
- irq_bindcount[irq]++;
+ irq_bindcount[irq]++;
- spin_unlock(&irq_mapping_update_lock);
+ spin_unlock(&irq_mapping_update_lock);
- return irq;
+ return irq;
}
EXPORT_SYMBOL(bind_ipi_to_irq);
void unbind_ipi_from_irq(int ipi)
{
- evtchn_op_t op;
- int cpu = smp_processor_id();
- int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
- int irq = evtchn_to_irq[evtchn];
+ evtchn_op_t op;
+ int cpu = smp_processor_id();
+ int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
+ int irq = evtchn_to_irq[evtchn];
- spin_lock(&irq_mapping_update_lock);
+ spin_lock(&irq_mapping_update_lock);
- if ( --irq_bindcount[irq] == 0 )
- {
- op.cmd = EVTCHNOP_close;
- op.u.close.dom = DOMID_SELF;
- op.u.close.port = evtchn;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
+ if (--irq_bindcount[irq] == 0) {
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
- /* See comments in unbind_virq_from_irq */
- bind_evtchn_to_cpu(evtchn, 0);
- evtchn_to_irq[evtchn] = -1;
- irq_to_evtchn[irq] = -1;
- per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
- }
+ /* See comments in unbind_virq_from_irq */
+ bind_evtchn_to_cpu(evtchn, 0);
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
+ }
- spin_unlock(&irq_mapping_update_lock);
+ spin_unlock(&irq_mapping_update_lock);
}
EXPORT_SYMBOL(unbind_ipi_from_irq);
int bind_evtchn_to_irq(unsigned int evtchn)
{
- int irq;
+ int irq;
- spin_lock(&irq_mapping_update_lock);
+ spin_lock(&irq_mapping_update_lock);
- if ( (irq = evtchn_to_irq[evtchn]) == -1 )
- {
- irq = find_unbound_irq();
- evtchn_to_irq[evtchn] = irq;
- irq_to_evtchn[irq] = evtchn;
- }
+ if ((irq = evtchn_to_irq[evtchn]) == -1) {
+ irq = find_unbound_irq();
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+ }
- irq_bindcount[irq]++;
+ irq_bindcount[irq]++;
- spin_unlock(&irq_mapping_update_lock);
+ spin_unlock(&irq_mapping_update_lock);
- return irq;
+ return irq;
}
EXPORT_SYMBOL(bind_evtchn_to_irq);
void unbind_evtchn_from_irq(unsigned int evtchn)
{
- int irq = evtchn_to_irq[evtchn];
+ int irq = evtchn_to_irq[evtchn];
- spin_lock(&irq_mapping_update_lock);
+ spin_lock(&irq_mapping_update_lock);
- if ( --irq_bindcount[irq] == 0 )
- {
- evtchn_to_irq[evtchn] = -1;
- irq_to_evtchn[irq] = -1;
- }
+ if (--irq_bindcount[irq] == 0) {
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ }
- spin_unlock(&irq_mapping_update_lock);
+ spin_unlock(&irq_mapping_update_lock);
}
EXPORT_SYMBOL(unbind_evtchn_from_irq);
int bind_evtchn_to_irqhandler(
- unsigned int evtchn,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long irqflags,
- const char *devname,
- void *dev_id)
+ unsigned int evtchn,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
{
- unsigned int irq;
- int retval;
+ unsigned int irq;
+ int retval;
- irq = bind_evtchn_to_irq(evtchn);
- retval = request_irq(irq, handler, irqflags, devname, dev_id);
- if ( retval != 0 )
- unbind_evtchn_from_irq(evtchn);
+ irq = bind_evtchn_to_irq(evtchn);
+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
+ if (retval != 0)
+ unbind_evtchn_from_irq(evtchn);
- return retval;
+ return retval;
}
EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id)
{
- unsigned int irq = evtchn_to_irq[evtchn];
- free_irq(irq, dev_id);
- unbind_evtchn_from_irq(evtchn);
+ unsigned int irq = evtchn_to_irq[evtchn];
+ free_irq(irq, dev_id);
+ unbind_evtchn_from_irq(evtchn);
}
EXPORT_SYMBOL(unbind_evtchn_from_irqhandler);
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
- evtchn_op_t op;
- int evtchn;
-
- spin_lock(&irq_mapping_update_lock);
- evtchn = irq_to_evtchn[irq];
- if (!VALID_EVTCHN(evtchn)) {
- spin_unlock(&irq_mapping_update_lock);
- return;
- }
-
- /* Tell Xen to send future instances of this interrupt to other vcpu. */
- op.cmd = EVTCHNOP_bind_vcpu;
- op.u.bind_vcpu.port = evtchn;
- op.u.bind_vcpu.vcpu = tcpu;
-
- /*
- * If this fails, it usually just indicates that we're dealing with a virq
- * or IPI channel, which don't actually need to be rebound. Ignore it,
- * but don't do the xenlinux-level rebind in that case.
- */
- if (HYPERVISOR_event_channel_op(&op) >= 0)
- bind_evtchn_to_cpu(evtchn, tcpu);
-
- spin_unlock(&irq_mapping_update_lock);
-
- /*
- * Now send the new target processor a NOP IPI. When this returns, it
- * will check for any pending interrupts, and so service any that got
- * delivered to the wrong processor by mistake.
- *
- * XXX: The only time this is called with interrupts disabled is from the
- * hotplug/hotunplug path. In that case, all cpus are stopped with
- * interrupts disabled, and the missed interrupts will be picked up when
- * they start again. This is kind of a hack.
- */
- if (!irqs_disabled())
- smp_call_function(do_nothing_function, NULL, 0, 0);
+ evtchn_op_t op;
+ int evtchn;
+
+ spin_lock(&irq_mapping_update_lock);
+ evtchn = irq_to_evtchn[irq];
+ if (!VALID_EVTCHN(evtchn)) {
+ spin_unlock(&irq_mapping_update_lock);
+ return;
+ }
+
+ /* Send future instances of this interrupt to other vcpu. */
+ op.cmd = EVTCHNOP_bind_vcpu;
+ op.u.bind_vcpu.port = evtchn;
+ op.u.bind_vcpu.vcpu = tcpu;
+
+ /*
+ * If this fails, it usually just indicates that we're dealing with a
+ * virq or IPI channel, which don't actually need to be rebound. Ignore
+ * it, but don't do the xenlinux-level rebind in that case.
+ */
+ if (HYPERVISOR_event_channel_op(&op) >= 0)
+ bind_evtchn_to_cpu(evtchn, tcpu);
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ /*
+ * Now send the new target processor a NOP IPI. When this returns, it
+ * will check for any pending interrupts, and so service any that got
+ * delivered to the wrong processor by mistake.
+ *
+ * XXX: The only time this is called with interrupts disabled is from
+ * the hotplug/hotunplug path. In that case, all cpus are stopped with
+ * interrupts disabled, and the missed interrupts will be picked up
+ * when they start again. This is kind of a hack.
+ */
+ if (!irqs_disabled())
+ smp_call_function(do_nothing_function, NULL, 0, 0);
}
static void set_affinity_irq(unsigned irq, cpumask_t dest)
{
- unsigned tcpu = first_cpu(dest);
- rebind_irq_to_cpu(irq, tcpu);
+ unsigned tcpu = first_cpu(dest);
+ rebind_irq_to_cpu(irq, tcpu);
}
/*
static unsigned int startup_dynirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
+ int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return 0;
- unmask_evtchn(evtchn);
- return 0;
+ if (!VALID_EVTCHN(evtchn))
+ return 0;
+ unmask_evtchn(evtchn);
+ return 0;
}
static void shutdown_dynirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
+ int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
- mask_evtchn(evtchn);
+ if (!VALID_EVTCHN(evtchn))
+ return;
+ mask_evtchn(evtchn);
}
static void enable_dynirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
+ int evtchn = irq_to_evtchn[irq];
- unmask_evtchn(evtchn);
+ unmask_evtchn(evtchn);
}
static void disable_dynirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
+ int evtchn = irq_to_evtchn[irq];
- mask_evtchn(evtchn);
+ mask_evtchn(evtchn);
}
static void ack_dynirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
+ int evtchn = irq_to_evtchn[irq];
- mask_evtchn(evtchn);
- clear_evtchn(evtchn);
+ mask_evtchn(evtchn);
+ clear_evtchn(evtchn);
}
static void end_dynirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
+ int evtchn = irq_to_evtchn[irq];
- if ( !(irq_desc[irq].status & IRQ_DISABLED) )
- unmask_evtchn(evtchn);
+ if (!(irq_desc[irq].status & IRQ_DISABLED))
+ unmask_evtchn(evtchn);
}
static struct hw_interrupt_type dynirq_type = {
- "Dynamic-irq",
- startup_dynirq,
- shutdown_dynirq,
- enable_dynirq,
- disable_dynirq,
- ack_dynirq,
- end_dynirq,
- set_affinity_irq
+ "Dynamic-irq",
+ startup_dynirq,
+ shutdown_dynirq,
+ enable_dynirq,
+ disable_dynirq,
+ ack_dynirq,
+ end_dynirq,
+ set_affinity_irq
};
static inline void pirq_unmask_notify(int pirq)
{
- physdev_op_t op;
- if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
- {
- op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
- (void)HYPERVISOR_physdev_op(&op);
- }
+ physdev_op_t op;
+ if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
+ op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
+ (void)HYPERVISOR_physdev_op(&op);
+ }
}
static inline void pirq_query_unmask(int pirq)
{
- physdev_op_t op;
- op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
- op.u.irq_status_query.irq = pirq;
- (void)HYPERVISOR_physdev_op(&op);
- clear_bit(pirq, &pirq_needs_unmask_notify[0]);
- if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
- set_bit(pirq, &pirq_needs_unmask_notify[0]);
+ physdev_op_t op;
+ op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
+ op.u.irq_status_query.irq = pirq;
+ (void)HYPERVISOR_physdev_op(&op);
+ clear_bit(pirq, &pirq_needs_unmask_notify[0]);
+ if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
+ set_bit(pirq, &pirq_needs_unmask_notify[0]);
}
/*
static unsigned int startup_pirq(unsigned int irq)
{
- evtchn_op_t op;
- int evtchn;
+ evtchn_op_t op;
+ int evtchn;
- op.cmd = EVTCHNOP_bind_pirq;
- op.u.bind_pirq.pirq = irq;
- /* NB. We are happy to share unless we are probing. */
- op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- {
- if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
- printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
- return 0;
- }
- evtchn = op.u.bind_pirq.port;
+ op.cmd = EVTCHNOP_bind_pirq;
+ op.u.bind_pirq.pirq = irq;
+ /* NB. We are happy to share unless we are probing. */
+ op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+ if (HYPERVISOR_event_channel_op(&op) != 0) {
+ if ( !probing_irq(irq) )
+ printk(KERN_INFO "Failed to obtain physical "
+ "IRQ %d\n", irq);
+ return 0;
+ }
+ evtchn = op.u.bind_pirq.port;
- pirq_query_unmask(irq_to_pirq(irq));
+ pirq_query_unmask(irq_to_pirq(irq));
- bind_evtchn_to_cpu(evtchn, 0);
- evtchn_to_irq[evtchn] = irq;
- irq_to_evtchn[irq] = evtchn;
+ bind_evtchn_to_cpu(evtchn, 0);
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq_to_pirq(irq));
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq_to_pirq(irq));
- return 0;
+ return 0;
}
static void shutdown_pirq(unsigned int irq)
{
- evtchn_op_t op;
- int evtchn = irq_to_evtchn[irq];
+ evtchn_op_t op;
+ int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
+ if (!VALID_EVTCHN(evtchn))
+ return;
- mask_evtchn(evtchn);
+ mask_evtchn(evtchn);
- op.cmd = EVTCHNOP_close;
- op.u.close.dom = DOMID_SELF;
- op.u.close.port = evtchn;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to unbind physical IRQ %d\n", irq);
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
- bind_evtchn_to_cpu(evtchn, 0);
- evtchn_to_irq[evtchn] = -1;
- irq_to_evtchn[irq] = -1;
+ bind_evtchn_to_cpu(evtchn, 0);
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
}
static void enable_pirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq_to_pirq(irq));
+ int evtchn = irq_to_evtchn[irq];
+ if (!VALID_EVTCHN(evtchn))
+ return;
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq_to_pirq(irq));
}
static void disable_pirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
- mask_evtchn(evtchn);
+ int evtchn = irq_to_evtchn[irq];
+ if (!VALID_EVTCHN(evtchn))
+ return;
+ mask_evtchn(evtchn);
}
static void ack_pirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
- mask_evtchn(evtchn);
- clear_evtchn(evtchn);
+ int evtchn = irq_to_evtchn[irq];
+ if (!VALID_EVTCHN(evtchn))
+ return;
+ mask_evtchn(evtchn);
+ clear_evtchn(evtchn);
}
static void end_pirq(unsigned int irq)
{
- int evtchn = irq_to_evtchn[irq];
- if ( !VALID_EVTCHN(evtchn) )
- return;
- if ( !(irq_desc[irq].status & IRQ_DISABLED) )
- {
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq_to_pirq(irq));
- }
+ int evtchn = irq_to_evtchn[irq];
+ if (!VALID_EVTCHN(evtchn))
+ return;
+ if (!(irq_desc[irq].status & IRQ_DISABLED)) {
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq_to_pirq(irq));
+ }
}
static struct hw_interrupt_type pirq_type = {
- "Phys-irq",
- startup_pirq,
- shutdown_pirq,
- enable_pirq,
- disable_pirq,
- ack_pirq,
- end_pirq,
- set_affinity_irq
+ "Phys-irq",
+ startup_pirq,
+ shutdown_pirq,
+ enable_pirq,
+ disable_pirq,
+ ack_pirq,
+ end_pirq,
+ set_affinity_irq
};
void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
{
- int evtchn = irq_to_evtchn[i];
- shared_info_t *s = HYPERVISOR_shared_info;
- if ( !VALID_EVTCHN(evtchn) )
- return;
- BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
- synch_set_bit(evtchn, &s->evtchn_pending[0]);
+ int evtchn = irq_to_evtchn[i];
+ shared_info_t *s = HYPERVISOR_shared_info;
+ if (!VALID_EVTCHN(evtchn))
+ return;
+ BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
+ synch_set_bit(evtchn, &s->evtchn_pending[0]);
}
void irq_suspend(void)
{
- int pirq, virq, irq, evtchn;
- int cpu = smp_processor_id(); /* XXX */
+ int pirq, virq, irq, evtchn;
+ int cpu = smp_processor_id(); /* XXX */
- /* Unbind VIRQs from event channels. */
- for ( virq = 0; virq < NR_VIRQS; virq++ )
- {
- if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
- continue;
- evtchn = irq_to_evtchn[irq];
+ /* Unbind VIRQs from event channels. */
+ for (virq = 0; virq < NR_VIRQS; virq++) {
+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
+ continue;
+ evtchn = irq_to_evtchn[irq];
- /* Mark the event channel as unused in our table. */
- evtchn_to_irq[evtchn] = -1;
- irq_to_evtchn[irq] = -1;
- }
+ /* Mark the event channel as unused in our table. */
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ }
- /* Check that no PIRQs are still bound. */
- for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
- if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
- panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
- pirq, evtchn);
+ /* Check that no PIRQs are still bound. */
+ for (pirq = 0; pirq < NR_PIRQS; pirq++)
+ if ((evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1)
+ panic("Suspend attempted while PIRQ %d bound "
+ "to evtchn %d.\n", pirq, evtchn);
}
void irq_resume(void)
{
- evtchn_op_t op;
- int virq, irq, evtchn;
- int cpu = smp_processor_id(); /* XXX */
+ evtchn_op_t op;
+ int virq, irq, evtchn;
+ int cpu = smp_processor_id(); /* XXX */
- for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
- mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
+ /* New event-channel space is not 'live' yet. */
+ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
+ mask_evtchn(evtchn);
- for ( virq = 0; virq < NR_VIRQS; virq++ )
- {
- if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
- continue;
+ for (virq = 0; virq < NR_VIRQS; virq++) {
+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
+ continue;
- /* Get a new binding from Xen. */
- op.cmd = EVTCHNOP_bind_virq;
- op.u.bind_virq.virq = virq;
- if ( HYPERVISOR_event_channel_op(&op) != 0 )
- panic("Failed to bind virtual IRQ %d\n", virq);
- evtchn = op.u.bind_virq.port;
+ /* Get a new binding from Xen. */
+ op.cmd = EVTCHNOP_bind_virq;
+ op.u.bind_virq.virq = virq;
+ BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
+ evtchn = op.u.bind_virq.port;
- /* Record the new mapping. */
- bind_evtchn_to_cpu(evtchn, 0);
- evtchn_to_irq[evtchn] = irq;
- irq_to_evtchn[irq] = evtchn;
+ /* Record the new mapping. */
+ bind_evtchn_to_cpu(evtchn, 0);
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
- /* Ready for use. */
- unmask_evtchn(evtchn);
- }
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+ }
}
void __init init_IRQ(void)
{
- int i;
- int cpu;
+ int i;
+ int cpu;
- irq_ctx_init(0);
+ irq_ctx_init(0);
- spin_lock_init(&irq_mapping_update_lock);
+ spin_lock_init(&irq_mapping_update_lock);
#ifdef CONFIG_SMP
- /* By default all event channels notify CPU#0. */
- memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
+ /* By default all event channels notify CPU#0. */
+ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
#endif
- for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
- /* No VIRQ -> IRQ mappings. */
- for ( i = 0; i < NR_VIRQS; i++ )
- per_cpu(virq_to_irq, cpu)[i] = -1;
- }
-
- /* No event-channel -> IRQ mappings. */
- for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
- {
- evtchn_to_irq[i] = -1;
- mask_evtchn(i); /* No event channels are 'live' right now. */
- }
-
- /* No IRQ -> event-channel mappings. */
- for ( i = 0; i < NR_IRQS; i++ )
- irq_to_evtchn[i] = -1;
-
- for ( i = 0; i < NR_DYNIRQS; i++ )
- {
- /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
- irq_bindcount[dynirq_to_irq(i)] = 0;
-
- irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
- irq_desc[dynirq_to_irq(i)].action = 0;
- irq_desc[dynirq_to_irq(i)].depth = 1;
- irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
- }
-
- for ( i = 0; i < NR_PIRQS; i++ )
- {
- /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
- irq_bindcount[pirq_to_irq(i)] = 1;
-
- irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
- irq_desc[pirq_to_irq(i)].action = 0;
- irq_desc[pirq_to_irq(i)].depth = 1;
- irq_desc[pirq_to_irq(i)].handler = &pirq_type;
- }
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ /* No VIRQ -> IRQ mappings. */
+ for (i = 0; i < NR_VIRQS; i++)
+ per_cpu(virq_to_irq, cpu)[i] = -1;
+ }
+
+ /* No event-channel -> IRQ mappings. */
+ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
+ evtchn_to_irq[i] = -1;
+ mask_evtchn(i); /* No event channels are 'live' right now. */
+ }
+
+ /* No IRQ -> event-channel mappings. */
+ for (i = 0; i < NR_IRQS; i++)
+ irq_to_evtchn[i] = -1;
+
+ /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
+ for (i = 0; i < NR_DYNIRQS; i++) {
+ irq_bindcount[dynirq_to_irq(i)] = 0;
+
+ irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
+ irq_desc[dynirq_to_irq(i)].action = 0;
+ irq_desc[dynirq_to_irq(i)].depth = 1;
+ irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
+ }
+
+ /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
+ for (i = 0; i < NR_PIRQS; i++)
+ {
+ irq_bindcount[pirq_to_irq(i)] = 1;
+
+ irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
+ irq_desc[pirq_to_irq(i)].action = 0;
+ irq_desc[pirq_to_irq(i)].depth = 1;
+ irq_desc[pirq_to_irq(i)].handler = &pirq_type;
+ }
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
#include <asm-xen/evtchn.h>
#include <asm/hypervisor.h>
#include <asm-xen/xen-public/dom0_ops.h>
-#include <asm-xen/queues.h>
#include <asm-xen/xenbus.h>
#include <linux/cpu.h>
#include <linux/kthread.h>
HYPERVISOR_shutdown();
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
int reboot_thru_bios = 0; /* for dmi_scan.c */
EXPORT_SYMBOL(machine_restart);
EXPORT_SYMBOL(machine_halt);
EXPORT_SYMBOL(machine_power_off);
-#endif
/******************************************************************************
static int __do_suspend(void *ignore)
{
- int i, j, k, fpp;
+ int i, j, k, fpp;
#ifdef CONFIG_XEN_USB_FRONTEND
- extern void usbif_resume();
+ extern void usbif_resume();
#else
#define usbif_resume() do{}while(0)
#endif
- extern int gnttab_suspend(void);
- extern int gnttab_resume(void);
+ extern int gnttab_suspend(void);
+ extern int gnttab_resume(void);
- extern void time_suspend(void);
- extern void time_resume(void);
- extern unsigned long max_pfn;
- extern unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[];
+ extern void time_suspend(void);
+ extern void time_resume(void);
+ extern unsigned long max_pfn;
+ extern unsigned long *pfn_to_mfn_frame_list_list;
+ extern unsigned long *pfn_to_mfn_frame_list[];
#ifdef CONFIG_SMP
- extern void smp_suspend(void);
- extern void smp_resume(void);
+ extern void smp_suspend(void);
+ extern void smp_resume(void);
- static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
- cpumask_t prev_online_cpus, prev_present_cpus;
+ static vcpu_guest_context_t suspended_cpu_records[NR_CPUS];
+ cpumask_t prev_online_cpus, prev_present_cpus;
- void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
- int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
+ void save_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
+ int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt);
#endif
- extern void xencons_suspend(void);
- extern void xencons_resume(void);
+ extern void xencons_suspend(void);
+ extern void xencons_resume(void);
- int err = 0;
+ int err = 0;
- BUG_ON(smp_processor_id() != 0);
- BUG_ON(in_interrupt());
+ BUG_ON(smp_processor_id() != 0);
+ BUG_ON(in_interrupt());
#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
- if (num_online_cpus() > 1) {
- printk(KERN_WARNING
- "Can't suspend SMP guests without CONFIG_HOTPLUG_CPU\n");
- return -EOPNOTSUPP;
- }
+ if (num_online_cpus() > 1) {
+ printk(KERN_WARNING "Can't suspend SMP guests "
+ "without CONFIG_HOTPLUG_CPU\n");
+ return -EOPNOTSUPP;
+ }
#endif
- preempt_disable();
+ preempt_disable();
#ifdef CONFIG_SMP
- /* Take all of the other cpus offline. We need to be careful not
- to get preempted between the final test for num_online_cpus()
- == 1 and disabling interrupts, since otherwise userspace could
- bring another cpu online, and then we'd be stuffed. At the
- same time, cpu_down can reschedule, so we need to enable
- preemption while doing that. This kind of sucks, but should be
- correct. */
- /* (We don't need to worry about other cpus bringing stuff up,
- since by the time num_online_cpus() == 1, there aren't any
- other cpus) */
- cpus_clear(prev_online_cpus);
- while (num_online_cpus() > 1) {
- preempt_enable();
- for_each_online_cpu(i) {
- if (i == 0)
- continue;
- err = cpu_down(i);
- if (err != 0) {
- printk(KERN_CRIT "Failed to take all CPUs down: %d.\n", err);
- goto out_reenable_cpus;
- }
- cpu_set(i, prev_online_cpus);
+ /* Take all of the other cpus offline. We need to be careful not
+ to get preempted between the final test for num_online_cpus()
+ == 1 and disabling interrupts, since otherwise userspace could
+ bring another cpu online, and then we'd be stuffed. At the
+ same time, cpu_down can reschedule, so we need to enable
+ preemption while doing that. This kind of sucks, but should be
+ correct. */
+ /* (We don't need to worry about other cpus bringing stuff up,
+ since by the time num_online_cpus() == 1, there aren't any
+ other cpus) */
+ cpus_clear(prev_online_cpus);
+ while (num_online_cpus() > 1) {
+ preempt_enable();
+ for_each_online_cpu(i) {
+ if (i == 0)
+ continue;
+ err = cpu_down(i);
+ if (err != 0) {
+ printk(KERN_CRIT "Failed to take all CPUs "
+ "down: %d.\n", err);
+ goto out_reenable_cpus;
+ }
+ cpu_set(i, prev_online_cpus);
+ }
+ preempt_disable();
}
- preempt_disable();
- }
#endif
- __cli();
+ __cli();
- preempt_enable();
+ preempt_enable();
#ifdef CONFIG_SMP
- cpus_clear(prev_present_cpus);
- for_each_present_cpu(i) {
- if (i == 0)
- continue;
- save_vcpu_context(i, &suspended_cpu_records[i]);
- cpu_set(i, prev_present_cpus);
- }
+ cpus_clear(prev_present_cpus);
+ for_each_present_cpu(i) {
+ if (i == 0)
+ continue;
+ save_vcpu_context(i, &suspended_cpu_records[i]);
+ cpu_set(i, prev_present_cpus);
+ }
#endif
#ifdef __i386__
- mm_pin_all();
- kmem_cache_shrink(pgd_cache);
+ mm_pin_all();
+ kmem_cache_shrink(pgd_cache);
#endif
- time_suspend();
+ time_suspend();
#ifdef CONFIG_SMP
- smp_suspend();
+ smp_suspend();
#endif
- xenbus_suspend();
+ xenbus_suspend();
- xencons_suspend();
+ xencons_suspend();
- irq_suspend();
+ irq_suspend();
- gnttab_suspend();
+ gnttab_suspend();
- HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
- clear_fixmap(FIX_SHARED_INFO);
+ HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+ clear_fixmap(FIX_SHARED_INFO);
- xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
- xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
+ xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
+ xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
- /* We'll stop somewhere inside this hypercall. When it returns,
- we'll start resuming after the restore. */
- HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
+ /* We'll stop somewhere inside this hypercall. When it returns,
+ we'll start resuming after the restore. */
+ HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
- shutting_down = SHUTDOWN_INVALID;
+ shutting_down = SHUTDOWN_INVALID;
- set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
+ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
- HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
- memset(empty_zero_page, 0, PAGE_SIZE);
+ memset(empty_zero_page, 0, PAGE_SIZE);
- HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
virt_to_mfn(pfn_to_mfn_frame_list_list);
- fpp = PAGE_SIZE/sizeof(unsigned long);
- for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
- {
- if ( (j % fpp) == 0 )
- {
- k++;
- pfn_to_mfn_frame_list_list[k] =
- virt_to_mfn(pfn_to_mfn_frame_list[k]);
- j=0;
+ fpp = PAGE_SIZE/sizeof(unsigned long);
+ for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
+ if ((j % fpp) == 0) {
+ k++;
+ pfn_to_mfn_frame_list_list[k] =
+ virt_to_mfn(pfn_to_mfn_frame_list[k]);
+ j = 0;
+ }
+ pfn_to_mfn_frame_list[k][j] =
+ virt_to_mfn(&phys_to_machine_mapping[i]);
}
- pfn_to_mfn_frame_list[k][j] =
- virt_to_mfn(&phys_to_machine_mapping[i]);
- }
- HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
+ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
- gnttab_resume();
+ gnttab_resume();
- irq_resume();
+ irq_resume();
- xencons_resume();
+ xencons_resume();
- xenbus_resume();
+ xenbus_resume();
#ifdef CONFIG_SMP
- smp_resume();
+ smp_resume();
#endif
- time_resume();
+ time_resume();
- usbif_resume();
+ usbif_resume();
#ifdef CONFIG_SMP
- for_each_cpu_mask(i, prev_present_cpus)
- restore_vcpu_context(i, &suspended_cpu_records[i]);
+ for_each_cpu_mask(i, prev_present_cpus)
+ restore_vcpu_context(i, &suspended_cpu_records[i]);
#endif
- __sti();
+ __sti();
#ifdef CONFIG_SMP
out_reenable_cpus:
- for_each_cpu_mask(i, prev_online_cpus) {
- j = cpu_up(i);
- if (j != 0) {
- printk(KERN_CRIT "Failed to bring cpu %d back up (%d).\n",
- i, j);
- err = j;
+ for_each_cpu_mask(i, prev_online_cpus) {
+ j = cpu_up(i);
+ if (j != 0) {
+ printk(KERN_CRIT "Failed to bring cpu "
+ "%d back up (%d).\n",
+ i, j);
+ err = j;
+ }
}
- }
#endif
- return err;
+ return err;
}
static int shutdown_process(void *__unused)
{
- static char *envp[] = { "HOME=/", "TERM=linux",
- "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
- static char *restart_argv[] = { "/sbin/reboot", NULL };
- static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
+ static char *envp[] = { "HOME=/", "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
+ static char *restart_argv[] = { "/sbin/reboot", NULL };
+ static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
+
+ extern asmlinkage long sys_reboot(int magic1, int magic2,
+ unsigned int cmd, void *arg);
+
+ daemonize("shutdown");
+
+ switch (shutting_down) {
+ case SHUTDOWN_POWEROFF:
+ if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
+ sys_reboot(LINUX_REBOOT_MAGIC1,
+ LINUX_REBOOT_MAGIC2,
+ LINUX_REBOOT_CMD_POWER_OFF,
+ NULL);
+ }
+ break;
+
+ case SHUTDOWN_REBOOT:
+ if (execve("/sbin/reboot", restart_argv, envp) < 0) {
+ sys_reboot(LINUX_REBOOT_MAGIC1,
+ LINUX_REBOOT_MAGIC2,
+ LINUX_REBOOT_CMD_RESTART,
+ NULL);
+ }
+ break;
+ }
- extern asmlinkage long sys_reboot(int magic1, int magic2,
- unsigned int cmd, void *arg);
+ shutting_down = SHUTDOWN_INVALID; /* could try again */
- daemonize(
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- "shutdown"
-#endif
- );
-
- switch ( shutting_down )
- {
- case SHUTDOWN_POWEROFF:
- if ( execve("/sbin/poweroff", poweroff_argv, envp) < 0 )
- {
- sys_reboot(LINUX_REBOOT_MAGIC1,
- LINUX_REBOOT_MAGIC2,
- LINUX_REBOOT_CMD_POWER_OFF,
- NULL);
- }
- break;
-
- case SHUTDOWN_REBOOT:
- if ( execve("/sbin/reboot", restart_argv, envp) < 0 )
- {
- sys_reboot(LINUX_REBOOT_MAGIC1,
- LINUX_REBOOT_MAGIC2,
- LINUX_REBOOT_CMD_RESTART,
- NULL);
- }
- break;
- }
-
- shutting_down = SHUTDOWN_INVALID; /* could try again */
-
- return 0;
+ return 0;
}
static struct task_struct *kthread_create_on_cpu(int (*f)(void *arg),
const char *name,
int cpu)
{
- struct task_struct *p;
- p = kthread_create(f, arg, name);
- kthread_bind(p, cpu);
- wake_up_process(p);
- return p;
+ struct task_struct *p;
+ p = kthread_create(f, arg, name);
+ kthread_bind(p, cpu);
+ wake_up_process(p);
+ return p;
}
static void __shutdown_handler(void *unused)
{
- int err;
-
- if ( shutting_down != SHUTDOWN_SUSPEND )
- {
- err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
- if ( err < 0 )
- printk(KERN_ALERT "Error creating shutdown process!\n");
- }
- else
- {
- kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
- }
+ int err;
+
+ if (shutting_down != SHUTDOWN_SUSPEND) {
+ err = kernel_thread(shutdown_process, NULL,
+ CLONE_FS | CLONE_FILES);
+ if ( err < 0 )
+ printk(KERN_ALERT "Error creating shutdown "
+ "process!\n");
+ } else {
+ kthread_create_on_cpu(__do_suspend, NULL, "suspender", 0);
+ }
}
static void shutdown_handler(struct xenbus_watch *watch, const char *node)
{
- static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
- char *str;
- int err;
+ static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
+ char *str;
+ int err;
again:
- err = xenbus_transaction_start();
- if (err)
- return;
- str = (char *)xenbus_read("control", "shutdown", NULL);
- /* Ignore read errors and empty reads. */
- if (XENBUS_IS_ERR_READ(str)) {
- xenbus_transaction_end(1);
- return;
- }
-
- xenbus_write("control", "shutdown", "");
-
- err = xenbus_transaction_end(0);
- if (err == -EAGAIN) {
+ err = xenbus_transaction_start();
+ if (err)
+ return;
+ str = (char *)xenbus_read("control", "shutdown", NULL);
+ /* Ignore read errors and empty reads. */
+ if (XENBUS_IS_ERR_READ(str)) {
+ xenbus_transaction_end(1);
+ return;
+ }
+
+ xenbus_write("control", "shutdown", "");
+
+ err = xenbus_transaction_end(0);
+ if (err == -EAGAIN) {
+ kfree(str);
+ goto again;
+ }
+
+ if (strcmp(str, "poweroff") == 0)
+ shutting_down = SHUTDOWN_POWEROFF;
+ else if (strcmp(str, "reboot") == 0)
+ shutting_down = SHUTDOWN_REBOOT;
+ else if (strcmp(str, "suspend") == 0)
+ shutting_down = SHUTDOWN_SUSPEND;
+ else {
+ printk("Ignoring shutdown request: %s\n", str);
+ shutting_down = SHUTDOWN_INVALID;
+ }
+
kfree(str);
- goto again;
- }
-
- if (strcmp(str, "poweroff") == 0)
- shutting_down = SHUTDOWN_POWEROFF;
- else if (strcmp(str, "reboot") == 0)
- shutting_down = SHUTDOWN_REBOOT;
- else if (strcmp(str, "suspend") == 0)
- shutting_down = SHUTDOWN_SUSPEND;
- else {
- printk("Ignoring shutdown request: %s\n", str);
- shutting_down = SHUTDOWN_INVALID;
- }
-
- kfree(str);
-
- if (shutting_down != SHUTDOWN_INVALID)
- schedule_work(&shutdown_work);
+
+ if (shutting_down != SHUTDOWN_INVALID)
+ schedule_work(&shutdown_work);
}
#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handler(struct xenbus_watch *watch, const char *node)
{
- char sysrq_key = '\0';
- int err;
+ char sysrq_key = '\0';
+ int err;
again:
- err = xenbus_transaction_start();
- if (err)
- return;
- if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
- printk(KERN_ERR "Unable to read sysrq code in control/sysrq\n");
- xenbus_transaction_end(1);
- return;
- }
-
- if (sysrq_key != '\0')
- xenbus_printf("control", "sysrq", "%c", '\0');
-
- err = xenbus_transaction_end(0);
- if (err == -EAGAIN)
- goto again;
-
- if (sysrq_key != '\0') {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
- handle_sysrq(sysrq_key, NULL, NULL);
-#else
- handle_sysrq(sysrq_key, NULL, NULL, NULL);
-#endif
- }
+ err = xenbus_transaction_start();
+ if (err)
+ return;
+ if (!xenbus_scanf("control", "sysrq", "%c", &sysrq_key)) {
+ printk(KERN_ERR "Unable to read sysrq code in "
+ "control/sysrq\n");
+ xenbus_transaction_end(1);
+ return;
+ }
+
+ if (sysrq_key != '\0')
+ xenbus_printf("control", "sysrq", "%c", '\0');
+
+ err = xenbus_transaction_end(0);
+ if (err == -EAGAIN)
+ goto again;
+
+ if (sysrq_key != '\0') {
+ handle_sysrq(sysrq_key, NULL, NULL);
+ }
}
#endif
static struct xenbus_watch shutdown_watch = {
- .node = "control/shutdown",
- .callback = shutdown_handler
+ .node = "control/shutdown",
+ .callback = shutdown_handler
};
#ifdef CONFIG_MAGIC_SYSRQ
static struct xenbus_watch sysrq_watch = {
- .node ="control/sysrq",
- .callback = sysrq_handler
+ .node ="control/sysrq",
+ .callback = sysrq_handler
};
#endif
unsigned long event,
void *data)
{
- int err1 = 0;
+ int err1 = 0;
#ifdef CONFIG_MAGIC_SYSRQ
- int err2 = 0;
+ int err2 = 0;
#endif
- BUG_ON(down_trylock(&xenbus_lock) == 0);
+ BUG_ON(down_trylock(&xenbus_lock) == 0);
- err1 = register_xenbus_watch(&shutdown_watch);
+ err1 = register_xenbus_watch(&shutdown_watch);
#ifdef CONFIG_MAGIC_SYSRQ
- err2 = register_xenbus_watch(&sysrq_watch);
+ err2 = register_xenbus_watch(&sysrq_watch);
#endif
- if (err1) {
- printk(KERN_ERR "Failed to set shutdown watcher\n");
- }
+ if (err1) {
+ printk(KERN_ERR "Failed to set shutdown watcher\n");
+ }
#ifdef CONFIG_MAGIC_SYSRQ
- if (err2) {
- printk(KERN_ERR "Failed to set sysrq watcher\n");
- }
+ if (err2) {
+ printk(KERN_ERR "Failed to set sysrq watcher\n");
+ }
#endif
- return NOTIFY_DONE;
+ return NOTIFY_DONE;
}
static int __init setup_shutdown_event(void)
{
- xenstore_notifier.notifier_call = setup_shutdown_watcher;
+ xenstore_notifier.notifier_call = setup_shutdown_watcher;
- register_xenstore_notifier(&xenstore_notifier);
+ register_xenstore_notifier(&xenstore_notifier);
- return 0;
+ return 0;
}
subsys_initcall(setup_shutdown_event);
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
+#